本次主題是以colab的環境進行學習的,在本篇文章中,我將講解影像辨識的物件追蹤技術,依照進度每個禮拜都會記錄不同的影像辨識方法,基本順序會從:
這是這個主題的總集篇,會把前三天分開講解的程式碼合併再一起,方便大家做學習,之後每一篇主題也都會有這樣的總集篇,文章比較長,請見諒!
資料集準備:
本次使用的資料集是我自己拍攝的玩具螺絲螺帽,也就是本地端的資料集,由於是使用colab的關係所以要先把資料集上傳到Google雲端上,方便colab抓取資料集內容。
資料集的資料夾格式如下:
如圖:
資料夾內容:
Hexagon_1等資料夾裡面裝要辨識的照片:
雲端硬碟掛載:
from google.colab import drive
drive.mount('/content/drive')
模型訓練:
將雲端硬碟掛載好之後,我們就可以開始訓練模型了。後面文章會再補充模型的介紹以及模型的堆疊。在訓練好模型之後我們會將模型儲存到雲端硬碟,方便下次直接使用模型。
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import pathlib
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import PIL.Image as Image
import joblib
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
data_dir = pathlib.Path("/content/drive/MyDrive/train_data")
batch_size = 32
img_height = 180
img_width = 180
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=3,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.8,
subset="validation",
seed=3,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print(class_names)
AUTOTUNE = tf.data.AUTOTUNE
num_classes = len(class_names)
data_augmentation = keras.Sequential(
[
layers.RandomFlip("horizontal",
input_shape=(img_height,
img_width,
3)),
layers.RandomRotation(0.1),
layers.RandomZoom(0.1),
]
)
model = tf.keras.Sequential([
data_augmentation,
tf.keras.layers.Conv2D(64, 3, activation='relu',input_shape=(1,img_height, img_width, 3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(16, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(8, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
layers.Dropout(0.2),
tf.keras.layers.Flatten(
),
tf.keras.layers.Dense(8, activation='relu'),
tf.keras.layers.Dense(num_classes)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
#model = joblib.load('../h5/model.h5')
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=100
)
model.summary()
joblib.dump(model,'/content/drive/MyDrive/model/model_1.h5')
訓練完成後成果圖:
模型載入及預測:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import pathlib
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import time
import PIL.Image as Image
import joblib
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
data_dir = pathlib.Path("/content/drive/MyDrive/train_data")
batch_size = 32
img_height = 180
img_width = 180
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=3,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.8,
subset="validation",
seed=3,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
img_height = 180
img_width = 180
reload_model = joblib.load('/content/drive/MyDrive/model/model_1.h5')
reload_model.summary()
img = Image.open('/content/drive/MyDrive/test_data/test_image.jpg').convert('RGB')
img = img.resize((img_height, img_width))
img_array = tf.keras.utils.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = reload_model.predict(img_array)
score = tf.nn.softmax(predictions[0])
print("This image most likely belongs to {} with a {:.2f} percent confidence.".format(class_names[np.argmax(score)], 100 * np.max(score)))
實際預測結果:
完整程式碼:
from google.colab import drive
drive.mount('/content/drive')
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import pathlib
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import PIL.Image as Image
import joblib
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
data_dir = pathlib.Path("/content/drive/MyDrive/train_data")
batch_size = 32
img_height = 180
img_width = 180
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=3,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.8,
subset="validation",
seed=3,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print(class_names)
AUTOTUNE = tf.data.AUTOTUNE
num_classes = len(class_names)
data_augmentation = keras.Sequential(
[
layers.RandomFlip("horizontal",
input_shape=(img_height,
img_width,
3)),
layers.RandomRotation(0.1),
layers.RandomZoom(0.1),
]
)
model = tf.keras.Sequential([
data_augmentation,
tf.keras.layers.Conv2D(64, 3, activation='relu',input_shape=(1,img_height, img_width, 3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(16, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(8, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
layers.Dropout(0.2),
tf.keras.layers.Flatten(
),
tf.keras.layers.Dense(8, activation='relu'),
tf.keras.layers.Dense(num_classes)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
#model = joblib.load('../h5/model.h5')
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=100
)
model.summary()
joblib.dump(model,'/content/drive/MyDrive/model/model_1.h5')
reload_model = joblib.load('/content/drive/MyDrive/model/model_1.h5')
reload_model.summary()
img = Image.open('/content/drive/MyDrive/test_data/test_image.jpg').convert('RGB')
img = img.resize((img_height, img_width))
img_array = tf.keras.utils.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = reload_model.predict(img_array)
score = tf.nn.softmax(predictions[0])
print("This image most likely belongs to {} with a {:.2f} percent confidence.".format(class_names[np.argmax(score)], 100 * np.max(score)))
test_data的資料夾共享:
https://drive.google.com/drive/folders/1p1EFvg6nQWeArdrz9tUMxNaJ-Xx81Fph?usp=sharing
train_data的資料夾共享:
https://drive.google.com/drive/folders/1IvmcihJgYUH9YAc8ccctyN4QHVRs-Ff6?usp=drive_link
文章主題一覽:
粗體字為額外更新的文章。